In this report, we reproduce the analyses in the fMRI study 1.

prep data

First, we load the relevant packages, define functions and plotting aesthetics, and load and tidy the data.

load packages

library(pacman)
pacman::p_load(tidyverse, purrr, fs, knitr, lmerTest, ggeffects, kableExtra, boot, devtools, install = TRUE)
devtools::install_github("hadley/emo")

define functions

source("https://gist.githubusercontent.com/benmarwick/2a1bb0133ff568cbe28d/raw/fb53bd97121f7f9ce947837ef1a4c65a73bffb3f/geom_flat_violin.R")

# MLM results table function
table_model = function(model_data, print = TRUE) {
  table = model_data %>%
    broom.mixed::tidy(conf.int = TRUE) %>%
    filter(effect == "fixed") %>%
    rename("SE" = std.error,
           "t" = statistic,
           "p" = p.value) %>%
    select(-group, -effect) %>%
    mutate_at(vars(-contains("term"), -contains("p")), round, 2) %>%
    mutate(term = gsub("cond", "", term),
           term = gsub("\\(Intercept\\)", "intercept", term),
           term = gsub("condother", "other", term),
           term = gsub("condself", "self", term),
           term = gsub("siteUSA", "sample (USA)", term),
           term = gsub("self_referential", "self-referential", term),
           term = gsub("self_relevance", "self-relevance", term),
           term = gsub("social_relevance", "social relevance", term),
           term = gsub(":", " x ", term),
           p = ifelse(p < .001, "< .001",
               ifelse(p == 1, "1.000", gsub("0.(.*)", ".\\1", sprintf("%.3f", p)))),
           `b [95% CI]` = sprintf("%.2f [%0.2f, %.2f]", estimate, conf.low, conf.high)) %>%
    select(term, `b [95% CI]`, df, t, p)
  
  if (isTRUE(print)) {
    table  %>%
      kable() %>%
      kableExtra::kable_styling()
  } else {
    table
  }
}

simple_slopes = function(model, var, moderator, continuous = TRUE) {
  
  if (isTRUE(continuous)) {
    emmeans::emtrends(model, as.formula(paste("~", moderator)), var = var) %>%
      data.frame() %>%
      rename("trend" = 2) %>%
      mutate(`b [95% CI]` = sprintf("%.2f [%.2f, %.2f]", trend, asymp.LCL, asymp.UCL)) %>%
      select(!!moderator, `b [95% CI]`) %>%
      kable()  %>%
      kableExtra::kable_styling()
    
  } else {
    confint(emmeans::contrast(emmeans::emmeans(model, as.formula(paste("~", var, "|", moderator))), "revpairwise", by = moderator, adjust = "none")) %>%
      data.frame() %>%
      filter(grepl("control", contrast)) %>%
      mutate(`b [95% CI]` = sprintf("%.2f [%.2f, %.2f]", estimate, asymp.LCL, asymp.UCL)) %>%
      select(contrast, !!moderator, `b [95% CI]`) %>%
      arrange(contrast) %>%
      kable()  %>%
      kableExtra::kable_styling()
  }
}

define aesthetics

palette_condition = c("self" = "#ee9b00",
                      "control" = "#bb3e03",
                      "other" = "#005f73")
palette_roi = c("self-referential" = "#ee9b00",
               "mentalizing" = "#005f73")
palette_dv = c("self-relevance" = "#ee9b00",
               "social relevance" = "#005f73",
               "sharing" = "#56282D")
palette_sample = c("Netherlands" = "#027EA1",
                 "USA" = "#334456")

plot_aes = theme_minimal() +
  theme(legend.position = "top",
        legend.text = element_text(size = 12),
        text = element_text(size = 16, family = "Futura Medium"),
        panel.grid.major = element_blank(),
        panel.grid.minor = element_blank(),
        axis.text = element_text(color = "black"),
        axis.line = element_line(colour = "black"),
        axis.ticks.y = element_blank())

load and tidy data

merged_all = read.csv("../data/study1_data.csv")

merged = merged_all %>%
  filter(outlier == "no" | is.na(outlier)) %>%
  filter(atlas %in% c("self-referential", "mentalizing")) %>%
  group_by(pID, atlas) %>%
  mutate(parameter_estimate_std = parameter_estimate / sd(parameter_estimate, na.rm = TRUE)) 

merged_wide = merged %>%
  select(pID, site, trial, cond, value, self_relevance, social_relevance, atlas, parameter_estimate_std) %>%
  spread(atlas, parameter_estimate_std) %>%
  rename("self_referential" = `self-referential`)

quality check

Check the data quality and identify missing data

check number of participants

merged_wide %>%
  select(pID, site) %>%
  group_by(site) %>%
  unique() %>%
  summarize(n = n()) %>%
  arrange(n) %>%
  rename("sample" = site) %>%
  kable(digits = 2) %>%
  kableExtra::kable_styling()
sample n
Netherlands 40
USA 45

check number of trials

Print participant IDs who have < 72 trials

merged_wide %>%
  group_by(pID) %>%
  summarize(n = n()) %>%
  filter(n < 72) %>%
  arrange(n) %>%
  kable(digits = 2) %>%
  kableExtra::kable_styling()
pID n
BPP65 59
BPP52 62
BPP21 63
BPA23 64
BPA34 65
BPP05 66
BPA45 67
BPP61 67
BPA47 68
BPP64 68
BPA04 69
BPA29 69
BPP56 69
BPA12 70
BPP20 70
BPP58 70
BPA02 71
BPA05 71
BPA08 71
BPA16 71
BPA31 71
BPA32 71
BPA33 71
BPA35 71
BPA37 71
BPA38 71
BPA46 71
BPP22 71
BPP60 71
BPP67 71

check missing response data

Print participant IDs who have > 0 missing responses

merged_wide %>%
  filter(is.na(value)) %>%
  group_by(pID) %>%
  summarize(n = n()) %>%
  arrange(-n) %>%
  kable(digits = 2) %>%
  kableExtra::kable_styling()
pID n
BPA10 12
BPA35 12
BPP21 10
BPA45 9
BPA12 8
BPA33 4
BPP60 3
BPP20 2
BPP26 2
BPP56 2
BPP66 2
BPA02 1
BPA03 1
BPA04 1
BPA08 1
BPA27 1
BPA32 1
BPP12 1
BPP15 1
BPP29 1
BPP33 1
BPP47 1
BPP49 1
BPP65 1

check global signal

These plots are before outliers were excluded

all trials

merged_all %>%
  ggplot(aes("", global_mean, fill = cond)) +
  geom_flat_violin(position = position_nudge(x = .15, y = 0), color = FALSE, alpha = .5) +
  coord_flip() +
  geom_point(aes(color = cond), position = position_jitter(width = .05), size = .1, alpha = .2) + 
  geom_boxplot(width = .1, outlier.shape = NA, color = "black", position = position_dodge(.15)) +
  scale_fill_manual(values = palette_condition) +
  scale_color_manual(values = palette_condition) +
  scale_x_discrete(expand = c(0, .1)) +
  labs(x = "") + 
  plot_aes

individual averages

merged_all %>%
  group_by(pID, cond) %>%
  summarize(global_mean = mean(global_mean, na.rm = TRUE)) %>%
  ggplot(aes("", global_mean, fill = cond)) +
  geom_flat_violin(position = position_nudge(x = .15, y = 0), color = FALSE, alpha = .5) +
  coord_flip() +
  geom_point(aes(color = cond), position = position_jitter(width = .05), size = 1, alpha = .5) + 
  geom_boxplot(width = .1, outlier.shape = NA, color = "black", position = position_dodge(.15)) +
  scale_fill_manual(values = palette_condition) +
  scale_color_manual(values = palette_condition) +
  scale_x_discrete(expand = c(0, .1)) +
  labs(x = "") + 
  plot_aes

number of outliers

merged_all %>%
  group_by(outlier) %>%
  summarize(n = n()) %>%
  spread(outlier, n) %>%
  mutate(percent = round((yes / (yes + no)) * 100, 1))



descriptives

Summarize means, SDs, and correlations between the ROIs

ratings

merged_wide %>%
  gather(variable, value, value, self_relevance, social_relevance) %>%
  group_by(variable) %>%
  summarize(M = mean(value, na.rm = TRUE),
            SD = sd(value, na.rm = TRUE)) %>%
  mutate(variable = ifelse(variable == "self_relevance", "self-relevance",
                    ifelse(variable == "social_relevance", "social relevance", "sharing intention"))) %>%
  kable(digits = 2) %>%
  kableExtra::kable_styling()
variable M SD
self-relevance 2.57 1.02
social relevance 2.67 0.96
sharing intention 2.62 1.02

ROI activity

merged_wide %>%
  gather(variable, value, mentalizing, self_referential) %>%
  group_by(variable) %>%
  summarize(M = mean(value, na.rm = TRUE),
            SD = sd(value, na.rm = TRUE)) %>%
  kable(digits = 2) %>%
  kableExtra::kable_styling()
variable M SD
mentalizing 0.37 1.10
self_referential 0.14 1.11

ROI correlations

Correlation between self-referential and mentalizing ROIs. Given the high correlations, we also report sensitivity analyses with alternative, less highly correlated ROIs. Note, we do not include both ROIs in the same model, so multicollinearity is not an issue.

merged %>%
  select(pID, trial, cond, atlas, parameter_estimate) %>%
  spread(atlas, parameter_estimate) %>%
  rmcorr::rmcorr(as.factor(pID), mentalizing, `self-referential`, data = .)
## 
## Repeated measures correlation
## 
## r
## 0.9358986
## 
## degrees of freedom
## 5934
## 
## p-value
## 0
## 
## 95% confidence interval
## 0.9326641 0.9389826

replication analyses

H1

Is greater activity in the ROIs associated with higher self and social relevance ratings?

self-referential ROI

✅ H1a: Greater activity in the self-referential ROI will be associated with higher self-relevance ratings

mod_h1a =  lmer(self_relevance ~ self_referential + (1 + self_referential | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_model(mod_h1a)
term b [95% CI] df t p
intercept 2.56 [2.48, 2.64] 84.12 66.03 < .001
self-referential 0.05 [0.02, 0.07] 83.10 3.80 < .001

summary

summary(mod_h1a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_relevance ~ self_referential + (1 + self_referential | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16768.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4386 -0.7017  0.1439  0.6852  2.3596 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr 
##  pID      (Intercept)      0.114314 0.33810       
##           self_referential 0.001236 0.03516  -0.88
##  Residual                  0.917267 0.95774       
## Number of obs: 6020, groups:  pID, 85
## 
## Fixed effects:
##                  Estimate Std. Error       df t value             Pr(>|t|)    
## (Intercept)       2.55935    0.03876 84.11828  66.032 < 0.0000000000000002 ***
## self_referential  0.04878    0.01283 83.10239   3.801             0.000274 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## self_rfrntl -0.295

mentalizing ROI

✅ H1b: Greater activity in the mentalizing ROI will be associated with higher social relevance ratings

mod_h1b = lmer(social_relevance ~ mentalizing + (1 + mentalizing | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_model(mod_h1b)
term b [95% CI] df t p
intercept 2.66 [2.57, 2.74] 84.56 63.81 < .001
mentalizing 0.05 [0.02, 0.07] 83.46 4.04 < .001

summary

summary(mod_h1b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: social_relevance ~ mentalizing + (1 + mentalizing | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 15851.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.8288 -0.7220  0.1692  0.6497  2.6824 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr 
##  pID      (Intercept) 0.134298 0.36647       
##           mentalizing 0.001593 0.03992  -0.11
##  Residual             0.783116 0.88494       
## Number of obs: 6020, groups:  pID, 85
## 
## Fixed effects:
##             Estimate Std. Error       df t value             Pr(>|t|)    
## (Intercept)  2.65545    0.04161 84.56216  63.811 < 0.0000000000000002 ***
## mentalizing  0.04920    0.01218 83.46286   4.039             0.000119 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## mentalizing -0.132

combined plot

predicted = ggeffects::ggpredict(mod_h1a, c("self_referential [-4.5:5]")) %>%
  data.frame() %>%
  mutate(roi = "self-referential",
         variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h1b, c("mentalizing [-4.5:5]")) %>%
              data.frame() %>%
              mutate(roi = "mentalizing",
                     variable = "social relevance"))

ind_data = merged_wide %>%
  select(pID, trial, contains("relevance"), mentalizing, self_referential) %>%
  rename("self-referential" = self_referential) %>%
  gather(variable, predicted, contains("relevance")) %>%
  mutate(variable = gsub("self_relevance", "self-relevance", variable),
         variable = gsub("social_relevance", "social relevance", variable)) %>%
  gather(roi, x, mentalizing, `self-referential`) %>%
  filter(!(variable == "self-relevance" & roi == "mentalizing") & ! (variable == "social relevance" & roi == "self-referential"))

(plot_h1 = predicted %>%
  ggplot(aes(x, predicted)) +
  stat_smooth(data = ind_data, aes(group = pID, color = roi), geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high, fill = roi), alpha = .3, color = NA) +
  geom_line(aes(color = roi), size = 2) +
  facet_grid(~variable) +
  scale_color_manual(name = "", values = palette_roi, guide = FALSE) +
  scale_fill_manual(name = "", values = palette_roi, guide = FALSE) +
  labs(x = "\nROI activity (SD)", y = "predicted rating\n") +
  plot_aes  +
  theme(legend.position = "top",
        legend.key.width=unit(2,"cm")))

H2

Do the manipulations increase relevance?

self-relevance

❌ H2a: Self-focused intervention (compared to control) will increase self-relevance

mod_h2a = lmer(self_relevance ~ cond + (1 | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_model(mod_h2a)
term b [95% CI] df t p
intercept 2.55 [2.47, 2.64] 122.65 60.68 < .001
other 0.01 [-0.05, 0.07] 5933.24 0.23 .821
self 0.03 [-0.03, 0.09] 5933.33 1.10 .271

summary

summary(mod_h2a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_relevance ~ cond + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16792.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4264 -0.7090  0.1525  0.6718  2.3520 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1116   0.3340  
##  Residual             0.9209   0.9596  
## Number of obs: 6020, groups:  pID, 85
## 
## Fixed effects:
##                Estimate  Std. Error          df t value            Pr(>|t|)    
## (Intercept)    2.553956    0.042086  122.645765  60.684 <0.0000000000000002 ***
## condother      0.006864    0.030292 5933.237900   0.227               0.821    
## condself       0.033354    0.030300 5933.334972   1.101               0.271    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr) cndthr
## condother -0.360       
## condself  -0.360  0.500

social relevance

❌ H2b: Other-focused intervention (compared to control) will increase social relevance

mod_h2b = lmer(social_relevance ~ cond + (1 | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_model(mod_h2b)
term b [95% CI] df t p
intercept 2.64 [2.56, 2.73] 112.01 59.87 < .001
other 0.05 [-0.01, 0.10] 5933.20 1.64 .102
self 0.05 [-0.01, 0.10] 5933.27 1.63 .102

summary

summary(mod_h2b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: social_relevance ~ cond + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 15871.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.7789 -0.7182  0.1773  0.6546  2.6794 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1324   0.3639  
##  Residual             0.7869   0.8871  
## Number of obs: 6020, groups:  pID, 85
## 
## Fixed effects:
##               Estimate Std. Error         df t value            Pr(>|t|)    
## (Intercept)    2.64369    0.04416  112.00676  59.873 <0.0000000000000002 ***
## condother      0.04584    0.02800 5933.19739   1.637               0.102    
## condself       0.04576    0.02801 5933.26965   1.634               0.102    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr) cndthr
## condother -0.317       
## condself  -0.317  0.500

combined plot

predicted_h2 = ggeffects::ggpredict(mod_h2a, c("cond")) %>%
  data.frame() %>%
  mutate(model = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h2b, c("cond")) %>%
              data.frame() %>%
              mutate(model = "social relevance")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))

ind_data_h2 = merged_wide %>%
  rename("x" = cond) %>%
  gather(model, predicted, self_relevance, social_relevance) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         model = gsub("self_relevance", "self-relevance", model),
         model = gsub("social_relevance", "social relevance", model))
  
(plot_h2 = predicted_h2 %>%
  ggplot(aes(x = x, y = predicted)) +
  stat_summary(data = ind_data_h2, aes(group = pID), fun = "mean", geom = "line",
               size = .1, color = "grey50") +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1) +
  geom_pointrange(aes(color = x, ymin = conf.low, ymax = conf.high), size = .75) +
  facet_grid(~model) +
  scale_color_manual(name = "", values = palette_condition, guide = "none") +
  scale_alpha_manual(name = "", values = c(1, .5)) +
  labs(x = "", y = "predicted rating\n") +
  plot_aes +
  theme(legend.position = c(.85, .15)))

H3

Is greater self and social relevance associated with higher sharing intentions?

✅ H1a: Greater self-relevance ratings will be associated with higher sharing intentions

✅ H1a: Greater social relevance ratings will be associated with higher sharing intentions

mod_h3 = lmer(value ~ self_relevance + social_relevance + (1 + self_relevance + social_relevance | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

plot

predicted = ggeffects::ggpredict(mod_h3, c("self_relevance")) %>%
  data.frame() %>%
  mutate(variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h3, c("social_relevance")) %>%
              data.frame() %>%
              mutate(variable = "social relevance"))

points = merged_wide %>%
  rename("self-referential" = self_referential,
         "predicted" = value) %>%
  gather(variable, x, contains("relevance")) %>%
  mutate(variable = gsub("self_relevance", "self-relevance", variable),
         variable = gsub("social_relevance", "social relevance", variable))

(plot_rel_sharing = predicted %>%
  ggplot(aes(x, predicted)) +
  stat_smooth(data = points, aes(group = pID, color = variable),
              geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high, fill = variable), alpha = .2, color = NA) +
  geom_line(aes(color = variable), size = 1.5) +
  facet_grid(~variable) +
  scale_color_manual(name = "", values = palette_dv[1:2]) +
  scale_fill_manual(name = "", values = palette_dv[1:2]) +
  labs(x = "\nrelevance rating", y = "predicted sharing intention rating\n") +
  plot_aes +
    theme(legend.position = "none"))

model table

table_model(mod_h3)
term b [95% CI] df t p
intercept 1.18 [1.05, 1.30] 76.54 18.77 < .001
self-relevance 0.30 [0.27, 0.34] 85.52 15.64 < .001
social relevance 0.25 [0.20, 0.30] 82.46 9.76 < .001

summary

summary(mod_h3)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ self_relevance + social_relevance + (1 + self_relevance +  
##     social_relevance | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 14916
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3658 -0.7056  0.0604  0.6922  3.0488 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr       
##  pID      (Intercept)      0.21401  0.4626              
##           self_relevance   0.01171  0.1082   -0.22      
##           social_relevance 0.03214  0.1793   -0.59 -0.56
##  Residual                  0.68629  0.8284              
## Number of obs: 5941, groups:  pID, 85
## 
## Fixed effects:
##                  Estimate Std. Error       df t value             Pr(>|t|)    
## (Intercept)       1.17695    0.06269 76.54105   18.77 < 0.0000000000000002 ***
## self_relevance    0.30493    0.01950 85.52198   15.64 < 0.0000000000000002 ***
## social_relevance  0.25191    0.02581 82.45605    9.76  0.00000000000000211 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) slf_rl
## self_relvnc -0.234       
## socil_rlvnc -0.558 -0.600

preregistered analyses

Link to the preregistration

Deviations:

  • removed condition slope as a random effect in the following models because they did not converge in H5


H4

Do the manipulations increase neural activity in brain regions associated with self-referential processing and mentalizing?

self-referential ROI

✅ H4a: Self-focused intervention (compared to control) will increase brain activity in ROIs related to self-referential processes.

mod_h4a = lmer(self_referential ~ cond + (1 + cond | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_model(mod_h4a)
term b [95% CI] df t p
intercept 0.08 [-0.04, 0.19] 84.06 1.35 .181
other 0.09 [0.01, 0.17] 83.88 2.31 .023
self 0.09 [0.01, 0.18] 83.79 2.16 .033

summary

summary(mod_h4a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_referential ~ cond + (1 + cond | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 17301.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.8115 -0.6590  0.0013  0.6483  3.5571 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr       
##  pID      (Intercept) 0.22970  0.4793              
##           condother   0.04266  0.2065   -0.16      
##           condself    0.07394  0.2719   -0.06  0.58
##  Residual             0.97976  0.9898              
## Number of obs: 6020, groups:  pID, 85
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)  
## (Intercept)  0.07618    0.05649 84.06499   1.349   0.1811  
## condother    0.08879    0.03845 83.88106   2.309   0.0234 *
## condself     0.09304    0.04298 83.79299   2.165   0.0333 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr) cndthr
## condother -0.309       
## condself  -0.239  0.528

mentalizing ROI

✅❌ H4b: Other-focused intervention (compared to control) will increase brain activity in ROIs related to mentalizing processes.

The other condition is associated with increased activation in the mentalizing ROI. However, when condition is allowed to vary randomly across people, the relationship is not statistically significant.

mod_h4b = lmer(mentalizing ~ cond + (1 + cond | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_model(mod_h4b)
term b [95% CI] df t p
intercept 0.32 [0.21, 0.43] 84.08 5.84 < .001
other 0.06 [-0.01, 0.14] 83.66 1.71 .092
self 0.08 [-0.00, 0.16] 83.81 1.89 .063

summary

summary(mod_h4b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: mentalizing ~ cond + (1 + cond | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 17305.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.6435 -0.6587  0.0155  0.6742  3.3370 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr       
##  pID      (Intercept) 0.21636  0.4651              
##           condother   0.03522  0.1877   -0.16      
##           condself    0.06873  0.2622   -0.03  0.61
##  Residual             0.98266  0.9913              
## Number of obs: 6020, groups:  pID, 85
## 
## Fixed effects:
##             Estimate Std. Error       df t value     Pr(>|t|)    
## (Intercept)  0.32178    0.05509 84.08020   5.841 0.0000000952 ***
## condother    0.06373    0.03733 83.66351   1.707       0.0915 .  
## condself     0.07981    0.04230 83.81192   1.887       0.0626 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr) cndthr
## condother -0.315       
## condself  -0.232  0.534

combined plot

predicted_h4 = ggeffects::ggpredict(mod_h4a, c("cond")) %>%
  data.frame() %>%
  mutate(atlas = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h4b, c("cond")) %>%
              data.frame() %>%
              mutate(atlas = "mentalizing")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

ind_data_h4 = merged %>%
  select(pID, cond, run, trial, atlas, parameter_estimate_std) %>%
  unique() %>%
  rename("x" = cond,
         "predicted" = parameter_estimate_std) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

(plot_h4 = predicted_h4 %>%
  ggplot(aes(x = x, y = predicted)) +
  stat_summary(data = ind_data_h4, aes(group = pID), fun = "mean", geom = "line",
               size = .1, color = "grey50") +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1) +
  geom_pointrange(aes(color = x, ymin = conf.low, ymax = conf.high), size = .75) +
  facet_grid(~atlas) +
  scale_color_manual(name = "", values = palette_condition, guide = "none") +
  scale_alpha_manual(name = "", values = c(1, .5)) +
  labs(x = "", y = "ROI activity (SD)\n") +
  plot_aes +
  theme(legend.position = c(.85, .15)))

H5

Do the manipulations increase sharing intentions?

❌ H5a: Self-focused intervention (compared to control) will increase sharing intentions

❌ H5b: Other-focused intervention (compared to control) will increase sharing intentions

mod_h5 = lmer(value ~ cond + (1 | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

plot

predicted_h5 = ggeffects::ggpredict(mod_h5, c("cond")) %>%
  data.frame() %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))

ind_data_h5 = merged_wide %>%
  rename("x" = cond,
         "predicted" = value) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))
  
predicted_h5 %>%
  ggplot(aes(x = x, y = predicted)) +
  stat_summary(data = ind_data_h5, aes(group = pID), fun = "mean", geom = "line",
               size = .25, color = "grey50") +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1.5) +
  geom_pointrange(aes(color = x, ymin = conf.low, ymax = conf.high), size = 1.5) +
  scale_color_manual(name = "", values = palette_condition, guide = "none") +
  scale_alpha_manual(name = "", values = c(1, .5)) +
  labs(x = "", y = "predicted sharing intention\n") +
  plot_aes +
  theme(legend.position = c(.85, .15))

model table

table_model(mod_h5)
term b [95% CI] df t p
intercept 2.65 [2.56, 2.73] 126.07 63.74 < .001
other -0.03 [-0.09, 0.03] 5854.47 -1.04 .300
self -0.04 [-0.11, 0.02] 5854.57 -1.46 .144

summary

summary(mod_h5)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ cond + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16689.7
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5567 -0.7088  0.1152  0.7262  2.0380 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1061   0.3257  
##  Residual             0.9399   0.9695  
## Number of obs: 5941, groups:  pID, 85
## 
## Fixed effects:
##               Estimate Std. Error         df t value            Pr(>|t|)    
## (Intercept)    2.64561    0.04151  126.07314  63.742 <0.0000000000000002 ***
## condother     -0.03194    0.03080 5854.47426  -1.037               0.300    
## condself      -0.04498    0.03082 5854.56882  -1.459               0.144    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr) cndthr
## condother -0.371       
## condself  -0.371  0.499

H6

Is ROI activity positively related to sharing intentions?

self-referential ROI

✅ H6a: Stronger activity in the self-referential ROI will be related to higher sharing intentions.

mod_h6a = lmer(value ~ self_referential + (1 + self_referential | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_model(mod_h6a)
term b [95% CI] df t p
intercept 2.61 [2.53, 2.68] 84.40 68.80 < .001
self-referential 0.08 [0.06, 0.11] 81.88 6.09 < .001

summary

summary(mod_h6a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ self_referential + (1 + self_referential | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16642.7
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5959 -0.7255  0.1154  0.7346  2.2520 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr 
##  pID      (Intercept)      0.107975 0.32860       
##           self_referential 0.002403 0.04902  -0.22
##  Residual                  0.930586 0.96467       
## Number of obs: 5941, groups:  pID, 85
## 
## Fixed effects:
##                  Estimate Std. Error       df t value             Pr(>|t|)    
## (Intercept)       2.60734    0.03790 84.40186  68.798 < 0.0000000000000002 ***
## self_referential  0.08259    0.01355 81.88399   6.094         0.0000000343 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## self_rfrntl -0.124

mentalizing ROI

✅ H6b: Stronger activation in the mentalizing ROI will be related to higher sharing intentions.

mod_h6b = lmer(value ~ mentalizing + (1 + mentalizing | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_model(mod_h6b)
term b [95% CI] df t p
intercept 2.59 [2.52, 2.67] 85.42 67.99 < .001
mentalizing 0.07 [0.05, 0.10] 81.95 5.46 < .001

summary

summary(mod_h6b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ mentalizing + (1 + mentalizing | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16653.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5834 -0.7234  0.1182  0.7365  2.2021 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr 
##  pID      (Intercept) 0.10793  0.32853       
##           mentalizing 0.00191  0.04371  -0.11
##  Residual             0.93265  0.96574       
## Number of obs: 5941, groups:  pID, 85
## 
## Fixed effects:
##             Estimate Std. Error       df t value             Pr(>|t|)    
## (Intercept)  2.59231    0.03813 85.41773   67.99 < 0.0000000000000002 ***
## mentalizing  0.07292    0.01336 81.95114    5.46          0.000000498 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## mentalizing -0.150

combined plot

vals = seq(-4.5,4.5,.1)

predicted_h6 = ggeffects::ggpredict(mod_h6a, c("self_referential [vals]")) %>%
  data.frame() %>%
  mutate(roi = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h6b, c("mentalizing [vals]")) %>%
              data.frame() %>%
              mutate(roi = "mentalizing")) %>%
  mutate(roi = factor(roi, levels = c("self-referential", "mentalizing")))

ind_data_h6 = merged %>%
  select(pID, cond, run, trial, atlas, parameter_estimate_std, value) %>%
  rename("x" = parameter_estimate_std,
         "predicted" = value,
         "roi" = atlas) %>%
  mutate(roi = factor(roi, levels = c("self-referential", "mentalizing")))

predicted_h6 %>%
  ggplot(aes(x = x, y = predicted, color = roi, fill = roi)) +
  stat_smooth(data = ind_data_h6, aes(group = pID), geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .2, color = NA) +
  geom_line(size = 2) +
  facet_grid(~roi) +
  scale_color_manual(name = "", values = palette_roi) +
  scale_fill_manual(name = "", values = palette_roi) +
  labs(y = "predicted sharing intention\n", x = "\nROI activity (SD)") +
  plot_aes +
  theme(legend.position = "none")

H7

Is there an indirect effect of the condition on sharing intentions through activity in self-referential and mentalizing ROIs?

prep data

# source functions
source("indirectMLM.R")

# create self condition dataframe
data_med_self = merged %>%
  filter(!cond == "other" & atlas == "self-referential") %>%
  mutate(cond = ifelse(cond == "self", 1, 0)) %>%
  select(pID, site, trial, cond, value, parameter_estimate) %>%
  data.frame()

# create social condition dataframe
data_med_other = merged %>%
  filter(!cond == "self" & atlas == "mentalizing") %>%
  mutate(cond = ifelse(cond == "other", 1, 0)) %>%
  select(pID, site, trial, cond, value, parameter_estimate) %>%
  data.frame()

# define variables
y_var = "value"
m_var = "parameter_estimate"

self condition

✅ H7a: The effect of Self-focused intervention on sharing intention is mediated by increased activity in the self-referential ROI.

model_name = "mediation_self"
data = data_med_self

if (file.exists(sprintf("models/model_%s.RDS", model_name))) {
  assign(get("model_name"), readRDS(sprintf("models/model_%s.RDS", model_name)))
} else {
  assign(get("model_name"), boot(data = data, statistic = indirect.mlm, R = 500,
                                 y = y_var, x = "cond", mediator = m_var, group.id = "pID",
                                 between.m = F, uncentered.x = F))
  saveRDS(eval(parse(text = model_name)), sprintf("models/model_%s.RDS", model_name))
}

indirect.mlm.summary(get(model_name))
## #### Population Covariance ####
## Covariance of Random Slopes a and b: 0.001 [-0.003, 0.01]
## 
## 
## #### Indirect Effects ####
## # Within-subject Effects
## Unbiased Estimate of Within-subjects Indirect Effect: 0.006 [0.001, 0.019]
## Biased Estimate of Within-subjects Indirect Effect: 0.005 [0, 0.013]
## Bias in Within-subjects Indirect Effect: 0.001 [0, 0.01]
## 
## 
## #### Total Effect ####
## Unbiased Estimate of Total Effect: -0.047 [-0.117, 0.012]
## Biased Total Effect of X on Y (c path): -0.044 [-0.114, 0.014]
## Bias in Total Effect: 0.002 [0, 0.007]
## 
## 
## #### Direct Effects ####
## Direct Effect of Predictor on Dependent Variable (c' path): -0.053 [-0.122, 0.007]
## Within-subjects Effect of Predictor on Mediator (a path for group-mean centered predictor): 0.03 [0.002, 0.064]
## Within-subjects Effect of Mediator on Dependent Variable (b path for group-mean centered mediator): 0.173 [0.122, 0.261]

other condition

❌ H7b: The effect of Other-focused intervention on sharing intention is mediated by increased activity in the mentalizing ROI.

model_name = "mediation_other"
data = data_med_other

if (file.exists(sprintf("models/model_%s.RDS", model_name))) {
  assign(get("model_name"), readRDS(sprintf("models/model_%s.RDS", model_name)))
} else {
  assign(get("model_name"), boot(data = data, statistic = indirect.mlm, R = 500,
                                 y = y_var, x = "cond", mediator = m_var, group.id = "pID",
                                 between.m = F, uncentered.x = F))
  saveRDS(eval(parse(text = model_name)), sprintf("models/model_%s.RDS", model_name))
}

indirect.mlm.summary(get(model_name))
## #### Population Covariance ####
## Covariance of Random Slopes a and b: 0 [-0.004, 0.006]
## 
## 
## #### Indirect Effects ####
## # Within-subject Effects
## Unbiased Estimate of Within-subjects Indirect Effect: 0.003 [-0.002, 0.013]
## Biased Estimate of Within-subjects Indirect Effect: 0.003 [-0.001, 0.01]
## Bias in Within-subjects Indirect Effect: 0 [0, 0.006]
## 
## 
## #### Total Effect ####
## Unbiased Estimate of Total Effect: -0.031 [-0.089, 0.038]
## Biased Total Effect of X on Y (c path): -0.032 [-0.09, 0.039]
## Bias in Total Effect: 0.001 [0, 0.005]
## 
## 
## #### Direct Effects ####
## Direct Effect of Predictor on Dependent Variable (c' path): -0.034 [-0.093, 0.033]
## Within-subjects Effect of Predictor on Mediator (a path for group-mean centered predictor): 0.018 [-0.005, 0.042]
## Within-subjects Effect of Mediator on Dependent Variable (b path for group-mean centered mediator): 0.193 [0.145, 0.328]

exploratory moderation by cultural context

These analyses explore whether the analyses reported in study 1 of the main manuscript are moderated by cultural context (the Netherlands or the USA).

H1

Are the relationships between ROI activity and self and social relevance ratings moderated by cultural context?

self-referential ROI

These data are not consistent with moderation by cultural context.

mod_h1a =  lmer(self_relevance ~ self_referential * site + (1 + self_referential | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h1a = table_model(mod_h1a, print = FALSE)

table_h1a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.61 [2.50, 2.72] 82.71 46.34 < .001
self-referential 0.04 [0.01, 0.08] 84.72 2.36 .021
sample (USA) -0.09 [-0.25, 0.06] 83.71 -1.20 .233
self-referential x sample (USA) 0.01 [-0.04, 0.06] 83.24 0.37 .710

simple slopes

simple_slopes(mod_h1a, "self_referential", "site")
site b [95% CI]
Netherlands 0.04 [0.01, 0.08]
USA 0.05 [0.02, 0.09]

summary

summary(mod_h1a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_relevance ~ self_referential * site + (1 + self_referential |  
##     pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16775.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4417 -0.7005  0.1457  0.6815  2.3629 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr 
##  pID      (Intercept)      0.113632 0.33709       
##           self_referential 0.001393 0.03732  -0.82
##  Residual                  0.917256 0.95773       
## Number of obs: 6020, groups:  pID, 85
## 
## Fixed effects:
##                           Estimate Std. Error        df t value
## (Intercept)               2.608308   0.056284 82.710736  46.342
## self_referential          0.044628   0.018937 84.719417   2.357
## siteUSA                  -0.093299   0.077596 83.705009  -1.202
## self_referential:siteUSA  0.009665   0.025929 83.241541   0.373
##                                     Pr(>|t|)    
## (Intercept)              <0.0000000000000002 ***
## self_referential                      0.0207 *  
## siteUSA                               0.2326    
## self_referential:siteUSA              0.7103    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) slf_rf sitUSA
## self_rfrntl -0.220              
## siteUSA     -0.725  0.160       
## slf_rfr:USA  0.161 -0.730 -0.281

mentalizing ROI

These data are not consistent with moderation by cultural context.

mod_h1b = lmer(social_relevance ~ mentalizing * site + (1 + mentalizing | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h1b = table_model(mod_h1b, print = FALSE)

table_h1b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.72 [2.61, 2.84] 81.96 45.54 < .001
mentalizing 0.05 [0.01, 0.08] 83.43 2.63 .010
sample (USA) -0.13 [-0.30, 0.03] 83.52 -1.59 .116
mentalizing x sample (USA) 0.01 [-0.04, 0.05] 82.93 0.22 .824

simple slopes

simple_slopes(mod_h1b, "mentalizing", "site")
site b [95% CI]
Netherlands 0.05 [0.01, 0.08]
USA 0.05 [0.02, 0.09]

summary

summary(mod_h1b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: social_relevance ~ mentalizing * site + (1 + mentalizing | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 15857.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.8341 -0.7202  0.1659  0.6493  2.6852 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr 
##  pID      (Intercept) 0.131518 0.36265       
##           mentalizing 0.001772 0.04209  -0.09
##  Residual             0.783092 0.88493       
## Number of obs: 6020, groups:  pID, 85
## 
## Fixed effects:
##                      Estimate Std. Error        df t value            Pr(>|t|)
## (Intercept)          2.724241   0.059821 81.961889  45.540 <0.0000000000000002
## mentalizing          0.047136   0.017922 83.433282   2.630              0.0102
## siteUSA             -0.131330   0.082626 83.520316  -1.589              0.1157
## mentalizing:siteUSA  0.005497   0.024603 82.926189   0.223              0.8238
##                        
## (Intercept)         ***
## mentalizing         *  
## siteUSA                
## mentalizing:siteUSA    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) mntlzn sitUSA
## mentalizing -0.079              
## siteUSA     -0.724  0.057       
## mntlzng:USA  0.058 -0.728 -0.122

combined plot

predicted = ggeffects::ggpredict(mod_h1a, c("self_referential [-4.5:5]", "site")) %>%
  data.frame() %>%
  mutate(roi = "self-referential",
         variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h1b, c("mentalizing [-4.5:5]", "site")) %>%
              data.frame() %>%
              mutate(roi = "mentalizing",
                     variable = "social relevance"))

ind_data = merged_wide %>%
  select(site, pID, trial, contains("relevance"), mentalizing, self_referential) %>%
  rename("self-referential" = self_referential,
         "group" = site) %>%
  gather(variable, predicted, contains("relevance")) %>%
  mutate(variable = gsub("self_relevance", "self-relevance", variable),
         variable = gsub("social_relevance", "social relevance", variable)) %>%
  gather(roi, x, mentalizing, `self-referential`) %>%
  filter(!(variable == "self-relevance" & roi == "mentalizing") & ! (variable == "social relevance" & roi == "self-referential"))

(plot_h1 = predicted %>%
  ggplot(aes(x, predicted, color = group, fill = group)) +
  stat_smooth(data = ind_data, aes(group = interaction(pID, group)), geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .3, color = NA) +
  geom_line(size = 2) +
  facet_grid(~variable) +
  scale_color_manual(name = "", values = palette_sample) +
  scale_fill_manual(name = "", values = palette_sample) +
  labs(x = "\nROI activity (SD)", y = "predicted rating\n") +
  plot_aes +
  theme(legend.position = "top",
        legend.key.width=unit(2,"cm")))

H2

Are the effects of the experimental manipulations on relevance moderated by cultural context?

self-relevance

These data are not consistent with moderation by cultural context.

mod_h2a = lmer(self_relevance ~ cond * site + (1 | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h2a = table_model(mod_h2a, print = FALSE)

table_h2a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.58 [2.46, 2.70] 121.05 42.02 < .001
other 0.04 [-0.05, 0.12] 5931.32 0.86 .389
self 0.04 [-0.05, 0.13] 5931.19 0.91 .364
sample (USA) -0.05 [-0.21, 0.12] 121.12 -0.57 .571
other x sample (USA) -0.06 [-0.18, 0.06] 5931.25 -0.97 .333
self x sample (USA) -0.01 [-0.13, 0.11] 5931.32 -0.21 .834

simple slopes

simple_slopes(mod_h2a, "cond", "site", continuous = FALSE)
contrast site b [95% CI]
other - control Netherlands 0.04 [-0.05, 0.12]
other - control USA -0.02 [-0.10, 0.06]
self - control Netherlands 0.04 [-0.05, 0.13]
self - control USA 0.03 [-0.05, 0.11]

summary

summary(mod_h2a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_relevance ~ cond * site + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16801.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4242 -0.7136  0.1580  0.6769  2.3285 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1118   0.3343  
##  Residual             0.9210   0.9597  
## Number of obs: 6020, groups:  pID, 85
## 
## Fixed effects:
##                     Estimate Std. Error         df t value            Pr(>|t|)
## (Intercept)          2.57933    0.06138  121.05084  42.020 <0.0000000000000002
## condother            0.03800    0.04416 5931.32013   0.861               0.389
## condself             0.04007    0.04412 5931.19490   0.908               0.364
## siteUSA             -0.04792    0.08438  121.12306  -0.568               0.571
## condother:siteUSA   -0.05881    0.06069 5931.25137  -0.969               0.333
## condself:siteUSA    -0.01269    0.06070 5931.32369  -0.209               0.834
##                      
## (Intercept)       ***
## condother            
## condself             
## siteUSA              
## condother:siteUSA    
## condself:siteUSA     
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf sitUSA cndt:USA
## condother   -0.359                              
## condself    -0.360  0.500                       
## siteUSA     -0.727  0.261  0.262                
## cndthr:sUSA  0.261 -0.728 -0.364 -0.359         
## cndslf:sUSA  0.261 -0.363 -0.727 -0.359  0.500

social relevance

These data are not consistent with moderation by cultural context.

mod_h2b = lmer(social_relevance ~ cond * site + (1 | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h2b = table_model(mod_h2b, print = FALSE)

table_h2b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.73 [2.60, 2.85] 110.92 42.55 < .001
other 0.02 [-0.06, 0.10] 5931.27 0.42 .678
self 0.00 [-0.08, 0.08] 5931.17 0.03 .978
sample (USA) -0.16 [-0.33, 0.02] 110.97 -1.78 .077
other x sample (USA) 0.05 [-0.06, 0.16] 5931.21 0.97 .330
self x sample (USA) 0.08 [-0.03, 0.19] 5931.27 1.50 .132

simple slopes

simple_slopes(mod_h2b, "cond", "site", continuous = FALSE)
contrast site b [95% CI]
other - control Netherlands 0.02 [-0.06, 0.10]
other - control USA 0.07 [-0.00, 0.15]
self - control Netherlands 0.00 [-0.08, 0.08]
self - control USA 0.09 [0.01, 0.16]

summary

summary(mod_h2b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: social_relevance ~ cond * site + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 15878.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.7607 -0.7258  0.1765  0.6453  2.7077 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1310   0.3619  
##  Residual             0.7869   0.8871  
## Number of obs: 6020, groups:  pID, 85
## 
## Fixed effects:
##                     Estimate Std. Error         df t value            Pr(>|t|)
## (Intercept)          2.72683    0.06408  110.92203  42.553 <0.0000000000000002
## condother            0.01697    0.04081 5931.26544   0.416              0.6776
## condself             0.00114    0.04078 5931.17089   0.028              0.9777
## siteUSA             -0.15710    0.08808  110.97463  -1.784              0.0772
## condother:siteUSA    0.05460    0.05610 5931.21374   0.973              0.3305
## condself:siteUSA     0.08443    0.05611 5931.26816   1.505              0.1324
##                      
## (Intercept)       ***
## condother            
## condself             
## siteUSA           .  
## condother:siteUSA    
## condself:siteUSA     
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf sitUSA cndt:USA
## condother   -0.318                              
## condself    -0.318  0.500                       
## siteUSA     -0.728  0.231  0.232                
## cndthr:sUSA  0.231 -0.728 -0.364 -0.318         
## cndslf:sUSA  0.231 -0.363 -0.727 -0.318  0.500

combined plot

predicted_h2 = ggeffects::ggpredict(mod_h2a, c("cond", "site")) %>%
  data.frame() %>%
  mutate(model = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h2b, c("cond", "site")) %>%
              data.frame() %>%
              mutate(model = "social relevance")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))

ind_data_h2 = merged_wide %>%
  rename("x" = cond,
         "group" = site) %>%
  gather(model, predicted, self_relevance, social_relevance) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         model = gsub("self_relevance", "self-relevance", model),
         model = gsub("social_relevance", "social relevance", model))
  
(plot_h2 = predicted_h2 %>%
  ggplot(aes(x = x, y = predicted, color = group)) +
  stat_summary(data = ind_data_h2, aes(group = pID), fun = "mean", geom = "line", size = .1) +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1, position = position_dodge(.1)) +
  geom_pointrange(aes(ymin = conf.low, ymax = conf.high, group = group),
                  size = .75, position = position_dodge(.1)) +
  facet_grid(~model) +
  scale_color_manual(name = "", values = palette_sample) +
  labs(x = "", y = "predicted rating\n") +
  plot_aes +
  theme(legend.position = c(.85, .15)))

H3

Are the relationships between self and social relevance and sharing intentions moderated by cultural context?

These data are not consistent with moderation by cultural context.

mod_h3 = lmer(value ~ self_relevance * site + social_relevance * site + (1 + self_relevance + social_relevance | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

plot

predicted = ggeffects::ggpredict(mod_h3, c("self_relevance", "site")) %>%
  data.frame() %>%
  mutate(variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h3, c("social_relevance", "site")) %>%
              data.frame() %>%
              mutate(variable = "social relevance"))

points = merged_wide %>%
  rename("self-referential" = self_referential,
         "predicted" = value,
         "group" = site) %>%
  gather(variable, x, contains("relevance")) %>%
  mutate(variable = gsub("self_relevance", "self-relevance", variable),
         variable = gsub("social_relevance", "social relevance", variable))

(plot_rel_sharing = predicted %>%
  ggplot(aes(x, predicted, color = group, fill = group)) +
  stat_smooth(data = points, aes(group = interaction(pID, group)), geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .2, color = NA) +
  geom_line(size = 2) +
  facet_grid(~variable) +
  scale_color_manual(name = "", values = palette_sample) +
  scale_fill_manual(name = "", values = palette_sample) +
  labs(x = "\nrating", y = "predicted sharing intention\n") +
  plot_aes)

model table

table_h3 = table_model(mod_h3, print = FALSE)

table_h3 %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 1.14 [0.95, 1.32] 83.43 12.02 < .001
self-relevance 0.32 [0.27, 0.38] 89.44 11.06 < .001
sample (USA) 0.08 [-0.18, 0.33] 77.04 0.60 .550
social relevance 0.23 [0.15, 0.31] 88.51 5.96 < .001
self-relevance x sample (USA) -0.03 [-0.11, 0.04] 84.57 -0.89 .378
sample (USA) x social relevance 0.04 [-0.06, 0.14] 82.52 0.79 .430

simple slopes

self-relevance

simple_slopes(mod_h3, "self_relevance", "site", continuous = TRUE)
site b [95% CI]
Netherlands 0.32 [0.27, 0.38]
USA 0.29 [0.24, 0.34]

social -relevance

simple_slopes(mod_h3, "social_relevance", "site", continuous = TRUE)
site b [95% CI]
Netherlands 0.23 [0.15, 0.31]
USA 0.27 [0.20, 0.34]

summary

summary(mod_h3)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ self_relevance * site + social_relevance * site + (1 +  
##     self_relevance + social_relevance | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 14925.7
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3729 -0.6972  0.0581  0.6930  3.0510 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr       
##  pID      (Intercept)      0.21606  0.4648              
##           self_relevance   0.01161  0.1078   -0.21      
##           social_relevance 0.03234  0.1798   -0.61 -0.55
##  Residual                  0.68639  0.8285              
## Number of obs: 5941, groups:  pID, 85
## 
## Fixed effects:
##                          Estimate Std. Error       df t value
## (Intercept)               1.13563    0.09446 83.42815  12.022
## self_relevance            0.32373    0.02928 89.43861  11.055
## siteUSA                   0.07610    0.12659 77.03584   0.601
## social_relevance          0.23012    0.03864 88.51414   5.955
## self_relevance:siteUSA   -0.03477    0.03921 84.56900  -0.887
## siteUSA:social_relevance  0.04122    0.05201 82.52313   0.793
##                                      Pr(>|t|)    
## (Intercept)              < 0.0000000000000002 ***
## self_relevance           < 0.0000000000000002 ***
## siteUSA                                 0.550    
## social_relevance                 0.0000000515 ***
## self_relevance:siteUSA                  0.378    
## siteUSA:social_relevance                0.430    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) slf_rl sitUSA scl_rl s_:USA
## self_relvnc -0.229                            
## siteUSA     -0.746  0.171                     
## socil_rlvnc -0.574 -0.594  0.428              
## slf_rlv:USA  0.171 -0.747 -0.229  0.444       
## stUSA:scl_r  0.426  0.441 -0.568 -0.743 -0.597

H4

Are the effects of the experimental manipulations on ROI activity moderated by cultural context?

self-referential ROI

There is a main effect of site, such that the Philadelphia cohort has greater activity in the self-referential ROI compared to the Amsterdam cohort.

These data are not consistent with moderation by cultural context.

mod_h4a = lmer(self_referential ~ cond * site + (1 + cond | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h4a = table_model(mod_h4a, print = FALSE)

table_h4a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept -0.15 [-0.30, -0.00] 83.01 -2.03 .045
other 0.11 [-0.00, 0.22] 82.88 1.98 .051
self 0.09 [-0.04, 0.21] 82.59 1.39 .167
sample (USA) 0.43 [0.23, 0.64] 83.05 4.18 < .001
other x sample (USA) -0.04 [-0.20, 0.11] 82.88 -0.56 .579
self x sample (USA) 0.01 [-0.16, 0.18] 82.77 0.11 .911

simple slopes

simple_slopes(mod_h4a, "cond", "site", continuous = FALSE)
contrast site b [95% CI]
other - control Netherlands 0.11 [0.00, 0.22]
other - control USA 0.07 [-0.04, 0.17]
self - control Netherlands 0.09 [-0.04, 0.21]
self - control USA 0.10 [-0.02, 0.21]

summary

summary(mod_h4a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_referential ~ cond * site + (1 + cond | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 17293.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.8153 -0.6531  0.0029  0.6447  3.5689 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr       
##  pID      (Intercept) 0.18513  0.4303              
##           condother   0.04377  0.2092   -0.13      
##           condself    0.07588  0.2755   -0.08  0.58
##  Residual             0.97976  0.9898              
## Number of obs: 6020, groups:  pID, 85
## 
## Fixed effects:
##                    Estimate Std. Error        df t value  Pr(>|t|)    
## (Intercept)       -0.152944   0.075261 83.008146  -2.032    0.0453 *  
## condother          0.111564   0.056293 82.877507   1.982    0.0508 .  
## condself           0.087847   0.062993 82.585475   1.395    0.1669    
## siteUSA            0.432905   0.103449 83.045331   4.185 0.0000706 ***
## condother:siteUSA -0.043128   0.077376 82.884390  -0.557    0.5788    
## condself:siteUSA   0.009734   0.086633 82.774269   0.112    0.9108    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf sitUSA cndt:USA
## condother   -0.312                              
## condself    -0.269  0.530                       
## siteUSA     -0.728  0.227  0.196                
## cndthr:sUSA  0.227 -0.728 -0.385 -0.312         
## cndslf:sUSA  0.196 -0.385 -0.727 -0.269  0.530

mentalizing ROI

There is a main effect of site, such that the Philadelphia cohort has greater activity in the self-referential ROI compared to the Amsterdam cohort.

These data are not consistent with moderation by cultural context.

mod_h4b = lmer(mentalizing ~ cond * site + (1 + cond | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h4b = table_model(mod_h4b, print = FALSE)

table_h4b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 0.11 [-0.04, 0.26] 83.08 1.51 .134
other 0.12 [0.01, 0.23] 82.72 2.21 .030
self 0.08 [-0.04, 0.21] 82.59 1.32 .190
sample (USA) 0.40 [0.19, 0.60] 83.12 3.87 < .001
other x sample (USA) -0.11 [-0.25, 0.04] 82.72 -1.41 .161
self x sample (USA) -0.00 [-0.17, 0.17] 82.79 -0.05 .963

simple slopes

simple_slopes(mod_h4b, "cond", "site", continuous = FALSE)
contrast site b [95% CI]
other - control Netherlands 0.12 [0.01, 0.23]
other - control USA 0.01 [-0.09, 0.11]
self - control Netherlands 0.08 [-0.04, 0.20]
self - control USA 0.08 [-0.04, 0.19]

summary

summary(mod_h4b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: mentalizing ~ cond * site + (1 + cond | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 17299.5
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.6805 -0.6558  0.0178  0.6712  3.3474 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr       
##  pID      (Intercept) 0.17960  0.4238              
##           condother   0.03393  0.1842   -0.05      
##           condself    0.07062  0.2657   -0.04  0.63
##  Residual             0.98265  0.9913              
## Number of obs: 6020, groups:  pID, 85
## 
## Fixed effects:
##                    Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)        0.112513   0.074358 83.078809   1.513 0.134046    
## condother          0.119371   0.054119 82.715780   2.206 0.030179 *  
## condself           0.081894   0.061989 82.593094   1.321 0.190120    
## siteUSA            0.395417   0.102208 83.116344   3.869 0.000217 ***
## condother:siteUSA -0.105228   0.074389 82.717969  -1.415 0.160948    
## condself:siteUSA  -0.003991   0.085254 82.786184  -0.047 0.962771    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf sitUSA cndt:USA
## condother   -0.281                              
## condself    -0.249  0.540                       
## siteUSA     -0.728  0.204  0.181                
## cndthr:sUSA  0.204 -0.728 -0.393 -0.281         
## cndslf:sUSA  0.181 -0.392 -0.727 -0.250  0.540

combined plot

predicted_h4 = ggeffects::ggpredict(mod_h4a, c("cond", "site")) %>%
  data.frame() %>%
  mutate(atlas = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h4b, c("cond", "site")) %>%
              data.frame() %>%
              mutate(atlas = "mentalizing")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

ind_data_h4 = merged %>%
  filter(atlas %in% c("self-referential", "mentalizing")) %>%
  select(site, pID, cond, run, trial, atlas, parameter_estimate_std) %>%
  unique() %>%
  rename("x" = cond,
         "predicted" = parameter_estimate_std,
         "group" = site) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

(plot_h4 = predicted_h4 %>%
  ggplot(aes(x = x, y = predicted, color = group)) +
  stat_summary(data = ind_data_h4, aes(group = pID), fun = "mean", geom = "line", size = .1) +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1, position = position_dodge(.1)) +
  geom_pointrange(aes(ymin = conf.low, ymax = conf.high, group = group),
                  size = .75, position = position_dodge(.1)) +
  facet_grid(~atlas) +
  scale_color_manual(name = "", values = palette_sample) +
  labs(x = "", y = "ROI activity (SD)\n") +
  plot_aes +
  theme(legend.position = c(.85, .15)))

H5

Are the effect of the experimental manipulations on sharing intentions moderated by cultural context?

These data are not consistent with moderation by cultural context.

mod_h5 = lmer(value ~ cond * site + (1 | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

plot

predicted_h5 = ggeffects::ggpredict(mod_h5, c("cond", "site")) %>%
  data.frame() %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))

ind_data_h5 = merged_wide %>%
  rename("x" = cond,
         "predicted" = value,
         "group" = site) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))
  
predicted_h5 %>%
  ggplot(aes(x = x, y = predicted, color = group)) +
  stat_summary(data = ind_data_h5, aes(group = pID), fun = "mean", geom = "line", size = .1) +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1, position = position_dodge(.1)) +
  geom_pointrange(aes(ymin = conf.low, ymax = conf.high, group = group),
                  size = .75, position = position_dodge(.1)) +
  scale_color_manual(name = "", values = palette_sample) +
  labs(x = "", y = "predicted sharing intention\n") +
  plot_aes +
  theme(legend.position = c(.85, .15))

model table

table_h5 = table_model(mod_h5, print = FALSE)

table_h5 %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.61 [2.49, 2.73] 124.62 43.05 < .001
other -0.01 [-0.10, 0.08] 5852.58 -0.25 .805
self -0.05 [-0.14, 0.04] 5852.49 -1.09 .276
sample (USA) 0.06 [-0.11, 0.22] 124.28 0.71 .481
other x sample (USA) -0.04 [-0.16, 0.08] 5852.46 -0.63 .527
self x sample (USA) 0.01 [-0.11, 0.13] 5852.53 0.12 .901

simple slopes

simple_slopes(mod_h5, "cond", "site", continuous = FALSE)
contrast site b [95% CI]
other - control Netherlands -0.01 [-0.10, 0.08]
other - control USA -0.05 [-0.13, 0.03]
self - control Netherlands -0.05 [-0.14, 0.04]
self - control USA -0.04 [-0.12, 0.04]

summary

summary(mod_h5)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ cond * site + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16699.7
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5646 -0.7036  0.1165  0.7257  2.0359 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1069   0.3270  
##  Residual             0.9402   0.9696  
## Number of obs: 5941, groups:  pID, 85
## 
## Fixed effects:
##                      Estimate  Std. Error          df t value
## (Intercept)          2.614373    0.060722  124.615368  43.055
## condother           -0.011139    0.045032 5852.575167  -0.247
## condself            -0.049002    0.044982 5852.491609  -1.089
## siteUSA              0.058931    0.083398  124.278598   0.707
## condother:siteUSA   -0.039086    0.061735 5852.461991  -0.633
## condself:siteUSA     0.007698    0.061756 5852.527922   0.125
##                              Pr(>|t|)    
## (Intercept)       <0.0000000000000002 ***
## condother                       0.805    
## condself                        0.276    
## siteUSA                         0.481    
## condother:siteUSA               0.527    
## condself:siteUSA                0.901    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf sitUSA cndt:USA
## condother   -0.371                              
## condself    -0.371  0.500                       
## siteUSA     -0.728  0.270  0.270                
## cndthr:sUSA  0.270 -0.729 -0.365 -0.370         
## cndslf:sUSA  0.270 -0.364 -0.728 -0.370  0.500

H6

Are the relationships between ROI activity positively and sharing intentions moderated by cultural context?

self-referential ROI

These data are not consistent with moderation by cultural context.

mod_h6a = lmer(value ~ self_referential * site + (1 + self_referential | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h6a = table_model(mod_h6a, print = FALSE)

table_h6a %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.60 [2.49, 2.71] 82.87 46.78 < .001
self-referential 0.06 [0.02, 0.10] 82.75 3.08 .003
sample (USA) 0.01 [-0.14, 0.16] 83.86 0.10 .917
self-referential x sample (USA) 0.04 [-0.01, 0.09] 81.29 1.52 .132

simple slopes

simple_slopes(mod_h6a, "self_referential", "site", continuous = TRUE)
site b [95% CI]
Netherlands 0.06 [0.02, 0.10]
USA 0.10 [0.07, 0.14]

summary

summary(mod_h6a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ self_referential * site + (1 + self_referential | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16649
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.6105 -0.7254  0.1143  0.7412  2.3037 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr 
##  pID      (Intercept)      0.109783 0.33133       
##           self_referential 0.002096 0.04578  -0.24
##  Residual                  0.930610 0.96468       
## Number of obs: 5941, groups:  pID, 85
## 
## Fixed effects:
##                           Estimate Std. Error        df t value
## (Intercept)               2.599392   0.055563 82.867138  46.783
## self_referential          0.060585   0.019676 82.749266   3.079
## siteUSA                   0.007974   0.076602 83.861120   0.104
## self_referential:siteUSA  0.040969   0.026956 81.291545   1.520
##                                      Pr(>|t|)    
## (Intercept)              < 0.0000000000000002 ***
## self_referential                      0.00282 ** 
## siteUSA                               0.91735    
## self_referential:siteUSA              0.13243    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) slf_rf sitUSA
## self_rfrntl -0.059              
## siteUSA     -0.725  0.043       
## slf_rfr:USA  0.043 -0.730 -0.120

mentalizing ROI

These data are not consistent with moderation by cultural context.

mod_h6b = lmer(value ~ mentalizing * site + (1 + mentalizing | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h6b = table_model(mod_h6b, print = FALSE)

table_h6b %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.58 [2.47, 2.69] 82.50 46.49 < .001
mentalizing 0.06 [0.02, 0.10] 82.42 3.08 .003
sample (USA) 0.01 [-0.14, 0.17] 84.59 0.18 .860
mentalizing x sample (USA) 0.02 [-0.03, 0.08] 81.35 0.88 .383

simple slopes

simple_slopes(mod_h6b, "mentalizing", "site", continuous = TRUE)
site b [95% CI]
Netherlands 0.06 [0.02, 0.10]
USA 0.08 [0.05, 0.12]

summary

summary(mod_h6b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ mentalizing * site + (1 + mentalizing | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16661.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5901 -0.7255  0.1190  0.7413  2.2299 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr 
##  pID      (Intercept) 0.109376 0.33072       
##           mentalizing 0.001915 0.04377  -0.12
##  Residual             0.932679 0.96575       
## Number of obs: 5941, groups:  pID, 85
## 
## Fixed effects:
##                     Estimate Std. Error       df t value             Pr(>|t|)
## (Intercept)          2.58331    0.05556 82.49758  46.493 < 0.0000000000000002
## mentalizing          0.06021    0.01955 82.42216   3.080              0.00281
## siteUSA              0.01364    0.07688 84.59283   0.177              0.85963
## mentalizing:siteUSA  0.02352    0.02682 81.35212   0.877              0.38297
##                        
## (Intercept)         ***
## mentalizing         ** 
## siteUSA                
## mentalizing:siteUSA    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) mntlzn sitUSA
## mentalizing -0.096              
## siteUSA     -0.723  0.069       
## mntlzng:USA  0.070 -0.729 -0.146

combined plot

vals = seq(-4.5,4.5,.1)

predicted_h6 = ggeffects::ggpredict(mod_h6a, c("self_referential [vals]", "site")) %>%
  data.frame() %>%
  mutate(atlas = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h6b, c("mentalizing [vals]", "site")) %>%
              data.frame() %>%
              mutate(atlas = "mentalizing")) %>%
  mutate(atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

ind_data_h6 = merged %>%
  filter(atlas %in% c("self-referential", "mentalizing")) %>%
  select(site, pID, cond, run, trial, atlas, parameter_estimate_std, value) %>%
  rename("x" = parameter_estimate_std,
         "predicted" = value,
         "group" = site) %>%
  mutate(atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

predicted_h6 %>%
  ggplot(aes(x = x, y = predicted, color = group, fill = group)) +
  stat_smooth(data = ind_data_h6, aes(group = interaction(pID, group)), geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .2, color = NA) +
  geom_line(size = 2) +
  facet_grid(~atlas) +
  scale_color_manual(name = "", values = palette_sample) +
  scale_fill_manual(name = "", values = palette_sample) +
  labs(y = "predicted sharing intention\n", x = "\nROI activity (SD)") +
  plot_aes +
  theme(legend.position = "top")



combined table

table_h1a %>% mutate(DV = "H1a: Self-relevance") %>%
  bind_rows(table_h1b %>% mutate(DV = "H1b: Social relevance")) %>%
  bind_rows(table_h2a %>% mutate(DV = "H2a: Self-relevance")) %>%
  bind_rows(table_h2b %>% mutate(DV = "H2b: Social relevance")) %>%
  bind_rows(table_h3 %>% mutate(DV = "H3a-b: Sharing intention")) %>%
  bind_rows(table_h4a %>% mutate(DV = "H4a: Self-referential ROI")) %>%
  bind_rows(table_h4b %>% mutate(DV = "H4b: Mentalizing ROI")) %>%
  bind_rows(table_h5 %>% mutate(DV = "H5: Sharing intention")) %>%
  bind_rows(table_h6a %>% mutate(DV = "H6a: Sharing intention")) %>%
  bind_rows(table_h6b %>% mutate(DV = "H6b: Sharing intention")) %>%
  select(DV, everything()) %>%
  kable() %>%
  kable_styling()
DV term b [95% CI] df t p
H1a: Self-relevance intercept 2.61 [2.50, 2.72] 82.71 46.34 < .001
H1a: Self-relevance self-referential 0.04 [0.01, 0.08] 84.72 2.36 .021
H1a: Self-relevance sample (USA) -0.09 [-0.25, 0.06] 83.71 -1.20 .233
H1a: Self-relevance self-referential x sample (USA) 0.01 [-0.04, 0.06] 83.24 0.37 .710
H1b: Social relevance intercept 2.72 [2.61, 2.84] 81.96 45.54 < .001
H1b: Social relevance mentalizing 0.05 [0.01, 0.08] 83.43 2.63 .010
H1b: Social relevance sample (USA) -0.13 [-0.30, 0.03] 83.52 -1.59 .116
H1b: Social relevance mentalizing x sample (USA) 0.01 [-0.04, 0.05] 82.93 0.22 .824
H2a: Self-relevance intercept 2.58 [2.46, 2.70] 121.05 42.02 < .001
H2a: Self-relevance other 0.04 [-0.05, 0.12] 5931.32 0.86 .389
H2a: Self-relevance self 0.04 [-0.05, 0.13] 5931.19 0.91 .364
H2a: Self-relevance sample (USA) -0.05 [-0.21, 0.12] 121.12 -0.57 .571
H2a: Self-relevance other x sample (USA) -0.06 [-0.18, 0.06] 5931.25 -0.97 .333
H2a: Self-relevance self x sample (USA) -0.01 [-0.13, 0.11] 5931.32 -0.21 .834
H2b: Social relevance intercept 2.73 [2.60, 2.85] 110.92 42.55 < .001
H2b: Social relevance other 0.02 [-0.06, 0.10] 5931.27 0.42 .678
H2b: Social relevance self 0.00 [-0.08, 0.08] 5931.17 0.03 .978
H2b: Social relevance sample (USA) -0.16 [-0.33, 0.02] 110.97 -1.78 .077
H2b: Social relevance other x sample (USA) 0.05 [-0.06, 0.16] 5931.21 0.97 .330
H2b: Social relevance self x sample (USA) 0.08 [-0.03, 0.19] 5931.27 1.50 .132
H3a-b: Sharing intention intercept 1.14 [0.95, 1.32] 83.43 12.02 < .001
H3a-b: Sharing intention self-relevance 0.32 [0.27, 0.38] 89.44 11.06 < .001
H3a-b: Sharing intention sample (USA) 0.08 [-0.18, 0.33] 77.04 0.60 .550
H3a-b: Sharing intention social relevance 0.23 [0.15, 0.31] 88.51 5.96 < .001
H3a-b: Sharing intention self-relevance x sample (USA) -0.03 [-0.11, 0.04] 84.57 -0.89 .378
H3a-b: Sharing intention sample (USA) x social relevance 0.04 [-0.06, 0.14] 82.52 0.79 .430
H4a: Self-referential ROI intercept -0.15 [-0.30, -0.00] 83.01 -2.03 .045
H4a: Self-referential ROI other 0.11 [-0.00, 0.22] 82.88 1.98 .051
H4a: Self-referential ROI self 0.09 [-0.04, 0.21] 82.59 1.39 .167
H4a: Self-referential ROI sample (USA) 0.43 [0.23, 0.64] 83.05 4.18 < .001
H4a: Self-referential ROI other x sample (USA) -0.04 [-0.20, 0.11] 82.88 -0.56 .579
H4a: Self-referential ROI self x sample (USA) 0.01 [-0.16, 0.18] 82.77 0.11 .911
H4b: Mentalizing ROI intercept 0.11 [-0.04, 0.26] 83.08 1.51 .134
H4b: Mentalizing ROI other 0.12 [0.01, 0.23] 82.72 2.21 .030
H4b: Mentalizing ROI self 0.08 [-0.04, 0.21] 82.59 1.32 .190
H4b: Mentalizing ROI sample (USA) 0.40 [0.19, 0.60] 83.12 3.87 < .001
H4b: Mentalizing ROI other x sample (USA) -0.11 [-0.25, 0.04] 82.72 -1.41 .161
H4b: Mentalizing ROI self x sample (USA) -0.00 [-0.17, 0.17] 82.79 -0.05 .963
H5: Sharing intention intercept 2.61 [2.49, 2.73] 124.62 43.05 < .001
H5: Sharing intention other -0.01 [-0.10, 0.08] 5852.58 -0.25 .805
H5: Sharing intention self -0.05 [-0.14, 0.04] 5852.49 -1.09 .276
H5: Sharing intention sample (USA) 0.06 [-0.11, 0.22] 124.28 0.71 .481
H5: Sharing intention other x sample (USA) -0.04 [-0.16, 0.08] 5852.46 -0.63 .527
H5: Sharing intention self x sample (USA) 0.01 [-0.11, 0.13] 5852.53 0.12 .901
H6a: Sharing intention intercept 2.60 [2.49, 2.71] 82.87 46.78 < .001
H6a: Sharing intention self-referential 0.06 [0.02, 0.10] 82.75 3.08 .003
H6a: Sharing intention sample (USA) 0.01 [-0.14, 0.16] 83.86 0.10 .917
H6a: Sharing intention self-referential x sample (USA) 0.04 [-0.01, 0.09] 81.29 1.52 .132
H6b: Sharing intention intercept 2.58 [2.47, 2.69] 82.50 46.49 < .001
H6b: Sharing intention mentalizing 0.06 [0.02, 0.10] 82.42 3.08 .003
H6b: Sharing intention sample (USA) 0.01 [-0.14, 0.17] 84.59 0.18 .860
H6b: Sharing intention mentalizing x sample (USA) 0.02 [-0.03, 0.08] 81.35 0.88 .383

cite packages

report::cite_packages()
##   - Angelo Canty and Brian Ripley (2021). boot: Bootstrap R (S-Plus) Functions. R package version 1.3-28.
##   - Douglas Bates and Martin Maechler (2021). Matrix: Sparse and Dense Matrix Classes and Methods. R package version 1.3-4. https://CRAN.R-project.org/package=Matrix
##   - Douglas Bates, Martin Maechler, Ben Bolker, Steve Walker (2015). Fitting Linear Mixed-Effects Models Using lme4. Journal of Statistical Software, 67(1), 1-48. doi:10.18637/jss.v067.i01.
##   - H. Wickham. ggplot2: Elegant Graphics for Data Analysis. Springer-Verlag New York, 2016.
##   - Hadley Wickham (2019). stringr: Simple, Consistent Wrappers for Common String Operations. R package version 1.4.0. https://CRAN.R-project.org/package=stringr
##   - Hadley Wickham (2021). forcats: Tools for Working with Categorical Variables (Factors). R package version 0.5.1. https://CRAN.R-project.org/package=forcats
##   - Hadley Wickham and Maximilian Girlich (2022). tidyr: Tidy Messy Data. R package version 1.2.0. https://CRAN.R-project.org/package=tidyr
##   - Hadley Wickham, Jennifer Bryan and Malcolm Barrett (2021). usethis: Automate Package and Project Setup. R package version 2.1.5. https://CRAN.R-project.org/package=usethis
##   - Hadley Wickham, Jim Hester and Jennifer Bryan (2022). readr: Read Rectangular Text Data. R package version 2.1.2. https://CRAN.R-project.org/package=readr
##   - Hadley Wickham, Jim Hester, Winston Chang and Jennifer Bryan (2021). devtools: Tools to Make Developing R Packages Easier. R package version 2.4.3. https://CRAN.R-project.org/package=devtools
##   - Hadley Wickham, Romain François, Lionel Henry and Kirill Müller (2022). dplyr: A Grammar of Data Manipulation. R package version 1.0.9. https://CRAN.R-project.org/package=dplyr
##   - Hao Zhu (2021). kableExtra: Construct Complex Table with 'kable' and Pipe Syntax. R package version 1.3.4. https://CRAN.R-project.org/package=kableExtra
##   - Jim Hester, Hadley Wickham and Gábor Csárdi (2021). fs: Cross-Platform File System Operations Based on 'libuv'. R package version 1.5.2. https://CRAN.R-project.org/package=fs
##   - Kirill Müller and Hadley Wickham (2022). tibble: Simple Data Frames. R package version 3.1.8. https://CRAN.R-project.org/package=tibble
##   - Kuznetsova A, Brockhoff PB, Christensen RHB (2017). "lmerTest Package:Tests in Linear Mixed Effects Models." _Journal of StatisticalSoftware_, *82*(13), 1-26. doi: 10.18637/jss.v082.i13 (URL:https://doi.org/10.18637/jss.v082.i13).
##   - Lionel Henry and Hadley Wickham (2020). purrr: Functional Programming Tools. R package version 0.3.4. https://CRAN.R-project.org/package=purrr
##   - Lüdecke D (2018). "ggeffects: Tidy Data Frames of Marginal Effects fromRegression Models." _Journal of Open Source Software_, *3*(26), 772.doi: 10.21105/joss.00772 (URL: https://doi.org/10.21105/joss.00772).
##   - R Core Team (2021). R: A language and environment for statistical computing. R Foundation for Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.
##   - Rinker, T. W. & Kurkiewicz, D. (2017). pacman: Package Management for R. version 0.5.0. Buffalo, New York. http://github.com/trinker/pacman
##   - Wickham et al., (2019). Welcome to the tidyverse. Journal of Open Source Software, 4(43), 1686, https://doi.org/10.21105/joss.01686
##   - Yihui Xie (2021). knitr: A General-Purpose Package for Dynamic Report Generation in R. R package version 1.37.